Here we go through the API doc for tensorflow, and test various senarious out to get a better understanding of the mechanics of tensorflow


In [52]:
import tensorflow as tf
from pprint import pprint

Graph


In [2]:
c = tf.constant(4.0)
assert c.graph is tf.get_default_graph()

In [7]:
g = tf.Graph()
with g.as_default():
    # Define operations and tensors in `g`
    c = tf.constant(30.0)
    assert c.graph is g
    
    # now if we go g.finalize, no new operation could be added afterward.
    g.finalize()
    print(g.finalized)
    # uncomment this line, and it should give you an error!
    # d = tf.constant(10)


True

In [22]:
g = tf.Graph()
with g.as_default():
    a = tf.constant([[2]])
    b = tf.constant([[1]])
    
    with g.control_dependencies([a, b]):
        c = tf.matmul(a, b)

    with tf.Session() as sess:
        result = sess.run(c)
        print(result)


[[2]]

In [27]:
g = tf.Graph()
with g.device('/gpu:0'):
    a = tf.constant([[2]])
    with tf.Session() as sess:
        print(sess.run(a))


[[2]]

another example

with g.device('/gpu:0'):
  # All operations constructed in this context will be placed
  # on GPU 0.
  with g.device(None):
    # All operations constructed in this context will have no
    # assigned device.

# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
  if n.type == "MatMul":
    return "/gpu:0"
  else:
    return "/cpu:0"

with g.device(matmul_on_gpu):
  # All operations of type "MatMul" constructed in this context
  # will be placed on GPU 0; all other operations will be placed
  # on CPU 0.

constants and variables


In [31]:
x = tf.constant(1)
print(x.op.name)


Const_13

In [42]:
with tf.Graph().as_default() as g:
    y = tf.constant(1, name='y')
    assert y.op.name == 'y', y.op.name
    
    with g.name_scope('some_scope'):
        a = tf.constant(2, name='a')
        assert a.op.name == 'some_scope/a', a.op.name

A full example


In [43]:
with tf.Graph().as_default() as g:
  c = tf.constant(5.0, name="c")
  assert c.op.name == "c"
  c_1 = tf.constant(6.0, name="c")
  assert c_1.op.name == "c_1"

  # Creates a scope called "nested"
  with g.name_scope("nested") as scope:
    nested_c = tf.constant(10.0, name="c")
    assert nested_c.op.name == "nested/c"

    # Creates a nested scope called "inner".
    with g.name_scope("inner"):
      nested_inner_c = tf.constant(20.0, name="c")
      assert nested_inner_c.op.name == "nested/inner/c"

    # Create a nested scope called "inner_1".
    with g.name_scope("inner"):
      nested_inner_1_c = tf.constant(30.0, name="c")
      assert nested_inner_1_c.op.name == "nested/inner_1/c"

      # Treats `scope` as an absolute name scope, and
      # switches to the "nested/" scope.
      with g.name_scope(scope):
        nested_d = tf.constant(40.0, name="d")
        assert nested_d.op.name == "nested/d"

        with g.name_scope(""):
          e = tf.constant(50.0, name="e")
          assert e.op.name == "e"

In [57]:
with tf.Graph().as_default() as g:
    c = tf.constant(5.0, name='c')
    d = tf.constant(4, name='d')
    g.add_to_collection('samosa', c)
    g.add_to_collection('samosa', c)
    g.add_to_collection('samosa', c)
    
    pprint(g.get_collection('samosa'))
    
    # the list_ref returns the original list, which
    # can be mutated inplace to add or remove tensors.
    list_ref = g.get_collection_ref('samosa')
    list_ref.append(d)
    pprint(g.get_collection('samosa'))


[<tf.Tensor 'c:0' shape=() dtype=float32>,
 <tf.Tensor 'c:0' shape=() dtype=float32>,
 <tf.Tensor 'c:0' shape=() dtype=float32>]
[<tf.Tensor 'c:0' shape=() dtype=float32>,
 <tf.Tensor 'c:0' shape=() dtype=float32>,
 <tf.Tensor 'c:0' shape=() dtype=float32>,
 <tf.Tensor 'd:0' shape=() dtype=int32>]

In [ ]: